Close

@InProceedings{CavallariRibePont:2018:DoCrFe,
               author = "Cavallari, Gabriel B. and Ribeiro, Leonardo S. F. and Ponti, 
                         Moacir A.",
          affiliation = "USP and USP and USP",
                title = "Unsupervised representation learning using convolutional and 
                         stacked auto-encoders: a domain and cross-domain feature space 
                         analysis",
            booktitle = "Proceedings...",
                 year = "2018",
               editor = "Ross, Arun and Gastal, Eduardo S. L. and Jorge, Joaquim A. and 
                         Queiroz, Ricardo L. de and Minetto, Rodrigo and Sarkar, Sudeep and 
                         Papa, Jo{\~a}o Paulo and Oliveira, Manuel M. and Arbel{\'a}ez, 
                         Pablo and Mery, Domingo and Oliveira, Maria Cristina Ferreira de 
                         and Spina, Thiago Vallin and Mendes, Caroline Mazetto and Costa, 
                         Henrique S{\'e}rgio Gutierrez and Mejail, Marta Estela and Geus, 
                         Klaus de and Scheer, Sergio",
         organization = "Conference on Graphics, Patterns and Images, 31. (SIBGRAPI)",
            publisher = "IEEE Computer Society",
              address = "Los Alamitos",
             keywords = "Deep Learning, Representation learning, Feature extraction, 
                         Unsupervised feature learning.",
             abstract = "A feature learning task involves training models that are capable 
                         of inferring good representations (transformations of the original 
                         space) from input data alone. When working with limited or 
                         unlabelled data, and also when multiple visual domains are 
                         considered, methods that rely on large annotated datasets, such as 
                         Convolutional Neural Networks (CNNs), cannot be employed. In this 
                         paper we investigate different auto-encoder (AE) architectures, 
                         which require no labels, and explore training strategies to learn 
                         representations from images. The models are evaluated considering 
                         both the reconstruction error of the images and the feature spaces 
                         in terms of their discriminative power. We study the role of dense 
                         and convolutional layers on the results, as well as the depth and 
                         capacity of the networks, since those are shown to affect both the 
                         dimensionality reduction and the capability of generalising for 
                         different visual domains. Classification results with AE features 
                         were as discriminative as pre-trained CNN features. Our findings 
                         can be used as guidelines for the design of unsupervised 
                         representation learning methods within and across domains.",
  conference-location = "Foz do Igua{\c{c}}u, PR, Brazil",
      conference-year = "29 Oct.-1 Nov. 2018",
                  doi = "10.1109/SIBGRAPI.2018.00063",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI.2018.00063",
             language = "en",
                  ibi = "8JMKD3MGPAW/3RN5TEE",
                  url = "http://urlib.net/ibi/8JMKD3MGPAW/3RN5TEE",
           targetfile = "sibgrapi-2018_Analysis_of_cross_domain_unsupervised_learning.pdf",
        urlaccessdate = "2024, May 01"
}


Close